From: Wei Huang Date: Mon, 9 May 2011 10:38:30 +0000 (+0100) Subject: x86/fpu: clean up FPU context restore function X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22Dat/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22Dat?a=commitdiff_plain;h=3eb0faa4f80653d56e0e74d68872f6d73de23937;p=xen.git x86/fpu: clean up FPU context restore function This patch cleans up context restore function. It renames the function name to vcpu_restore_fpu(). It also extracts FPU restore code (frstor, fxrstor, xrstor) out into seperate functions. vcpu_restor_fpu() will dispatch to these functions depending on CPU's capability. Signed-off-by: Wei Huang --- diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 2d5bbfa7cc..0ed05cbcc1 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -348,7 +348,7 @@ static void svm_fpu_enter(struct vcpu *v) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - setup_fpu(v); + vcpu_restore_fpu(v); vmcb_set_exception_intercepts( vmcb, vmcb_get_exception_intercepts(vmcb) & ~(1U << TRAP_no_device)); } diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 0d291efe4b..b9aa49b599 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -612,7 +612,7 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) static void vmx_fpu_enter(struct vcpu *v) { - setup_fpu(v); + vcpu_restore_fpu(v); v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device); vmx_update_exception_bitmap(v); v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS; diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c index d57c847dcc..fc0c1c45f4 100644 --- a/xen/arch/x86/i387.c +++ b/xen/arch/x86/i387.c @@ -17,56 +17,37 @@ #include #include -static void load_mxcsr(unsigned long val) +#define MXCSR_DEFAULT 0x1f80 +static void fpu_init(void) { - val &= 0xffbf; - asm volatile ( "ldmxcsr %0" : : "m" (val) ); -} - -static void init_fpu(void); -static void restore_fpu(struct vcpu *v); - -void setup_fpu(struct vcpu *v) -{ - ASSERT(!is_idle_vcpu(v)); - - /* Avoid recursion. */ - clts(); - - if ( v->fpu_dirtied ) - return; - - if ( xsave_enabled(v) ) - { - /* - * XCR0 normally represents what guest OS set. In case of Xen itself, - * we set all supported feature mask before doing save/restore. - */ - set_xcr0(v->arch.xcr0_accum); - xrstor(v); - set_xcr0(v->arch.xcr0); - } - else if ( v->fpu_initialised ) - { - restore_fpu(v); - } - else + unsigned long val; + + asm volatile ( "fninit" ); + if ( cpu_has_xmm ) { - init_fpu(); + /* load default value into MXCSR control/status register */ + val = MXCSR_DEFAULT; + asm volatile ( "ldmxcsr %0" : : "m" (val) ); } - - v->fpu_initialised = 1; - v->fpu_dirtied = 1; } -static void init_fpu(void) +/*******************************/ +/* FPU Restore Functions */ +/*******************************/ +/* Restore x87 extended state */ +static inline void fpu_xrstor(struct vcpu *v) { - asm volatile ( "fninit" ); - if ( cpu_has_xmm ) - load_mxcsr(0x1f80); + /* + * XCR0 normally represents what guest OS set. In case of Xen itself, + * we set all supported feature mask before doing save/restore. + */ + set_xcr0(v->arch.xcr0_accum); + xrstor(v); + set_xcr0(v->arch.xcr0); } -static void restore_fpu(struct vcpu *v) +/* Restor x87 FPU, MMX, SSE and SSE2 state */ +static inline void fpu_fxrstor(struct vcpu *v) { const char *fpu_ctxt = v->arch.fpu_ctxt; @@ -75,41 +56,42 @@ static void restore_fpu(struct vcpu *v) * possibility, which may occur if the block was passed to us by control * tools, by silently clearing the block. */ - if ( cpu_has_fxsr ) - { - asm volatile ( + asm volatile ( #ifdef __i386__ - "1: fxrstor %0 \n" + "1: fxrstor %0 \n" #else /* __x86_64__ */ - /* See above for why the operands/constraints are this way. */ - "1: " REX64_PREFIX "fxrstor (%2)\n" + /* See above for why the operands/constraints are this way. */ + "1: " REX64_PREFIX "fxrstor (%2)\n" #endif - ".section .fixup,\"ax\" \n" - "2: push %%"__OP"ax \n" - " push %%"__OP"cx \n" - " push %%"__OP"di \n" - " lea %0,%%"__OP"di \n" - " mov %1,%%ecx \n" - " xor %%eax,%%eax \n" - " rep ; stosl \n" - " pop %%"__OP"di \n" - " pop %%"__OP"cx \n" - " pop %%"__OP"ax \n" - " jmp 1b \n" - ".previous \n" - _ASM_EXTABLE(1b, 2b) - : - : "m" (*fpu_ctxt), - "i" (sizeof(v->arch.xsave_area->fpu_sse)/4) + ".section .fixup,\"ax\" \n" + "2: push %%"__OP"ax \n" + " push %%"__OP"cx \n" + " push %%"__OP"di \n" + " lea %0,%%"__OP"di \n" + " mov %1,%%ecx \n" + " xor %%eax,%%eax \n" + " rep ; stosl \n" + " pop %%"__OP"di \n" + " pop %%"__OP"cx \n" + " pop %%"__OP"ax \n" + " jmp 1b \n" + ".previous \n" + _ASM_EXTABLE(1b, 2b) + : + : "m" (*fpu_ctxt), + "i" (sizeof(v->arch.xsave_area->fpu_sse)/4) #ifdef __x86_64__ - ,"cdaSDb" (fpu_ctxt) + ,"cdaSDb" (fpu_ctxt) #endif - ); - } - else - { - asm volatile ( "frstor %0" : : "m" (*fpu_ctxt) ); - } + ); +} + +/* Restore x87 extended state */ +static inline void fpu_frstor(struct vcpu *v) +{ + const char *fpu_ctxt = v->arch.fpu_ctxt; + + asm volatile ( "frstor %0" : : "m" (*fpu_ctxt) ); } /*******************************/ @@ -178,6 +160,35 @@ static inline void fpu_fsave(struct vcpu *v) /*******************************/ /* VCPU FPU Functions */ /*******************************/ +/* + * Restore FPU state when #NM is triggered. + */ +void vcpu_restore_fpu(struct vcpu *v) +{ + ASSERT(!is_idle_vcpu(v)); + + /* Avoid recursion. */ + clts(); + + if ( v->fpu_dirtied ) + return; + + if ( xsave_enabled(v) ) + fpu_xrstor(v); + else if ( v->fpu_initialised ) + { + if ( cpu_has_fxsr ) + fpu_fxrstor(v); + else + fpu_frstor(v); + } + else + fpu_init(); + + v->fpu_initialised = 1; + v->fpu_dirtied = 1; +} + /* * On each context switch, save the necessary FPU info of VCPU being switch * out. It dispatches saving operation based on CPU's capability. diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 0e2e14773e..b57bbf56ca 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -3198,7 +3198,7 @@ asmlinkage void do_device_not_available(struct cpu_user_regs *regs) BUG_ON(!guest_mode(regs)); - setup_fpu(curr); + vcpu_restore_fpu(curr); if ( curr->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS ) { diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h index d85713a89c..f3a7df0229 100644 --- a/xen/include/asm-x86/i387.h +++ b/xen/include/asm-x86/i387.h @@ -14,7 +14,7 @@ #include #include -void setup_fpu(struct vcpu *v); +void vcpu_restore_fpu(struct vcpu *v); void vcpu_save_fpu(struct vcpu *v); int vcpu_init_fpu(struct vcpu *v);